# Computations
import numpy as np
import pandas as pd
import pickle
# preprocessing
from sklearn import preprocessing
import re
# Visualisation libraries
## Text
from colorama import Fore, Back, Style
from IPython.display import Image, display, Markdown, Latex, clear_output
## progressbar
import progressbar
## plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
from plotly.subplots import make_subplots
import plotly.express as px
## seaborn
import seaborn as sns
## matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from matplotlib.font_manager import FontProperties
import matplotlib.colors as mcolors
plt.style.use('seaborn-whitegrid')
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['text.color'] = 'k'
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
In this article, we analyze the UCI Statlog (german credit data) from Kaggle.com.
The original dataset contains 1000 entries with 20 categorial/symbolic attributes prepared by Prof. Hofmann. In this dataset, each entry represents a person who takes a credit by a bank. Each person is classified as good or bad credit risks according to the set of attributes. The link to the original dataset can be found below.
It is almost impossible to understand the original dataset due to its complicated system of categories and symbols. Thus, I wrote a small Python script to convert it into a readable CSV file. Several columns are simply ignored, because in my opinion either they are not important or their descriptions are obscure. The selected attributes are:
Path = 'Statlog_Dataset/german_credit_data.csv'
Data = pd.read_csv(Path, index_col=0)
display(Data)
Target = 'Risk'
Labels = [x.title() for x in Data[Target].unique()]
def Data_Plot(Inp, Title, W = None):
data_info = Inp.dtypes.astype(str).to_frame(name='Data Type')
Temp = Inp.isnull().sum().to_frame(name = 'Number of NaN Values')
data_info = data_info.join(Temp, how='outer')
data_info ['Size'] = Inp.shape[0]
data_info['Percentage'] = 100 - np.round(100*(data_info['Number of NaN Values']/Inp.shape[0]),2)
data_info = data_info.reset_index(drop = False).rename(columns = {'index':'Features'})
#
fig = px.bar(data_info, x= 'Features', y= 'Percentage', color = 'Data Type',
text = 'Percentage',
color_discrete_sequence = ['PaleGreen', 'LightCyan', 'PeachPuff', 'Pink', 'Plum'],
hover_data = data_info.columns)
fig.update_layout(plot_bgcolor= 'white', legend=dict(x=1.01, y=.5, traceorder="normal",
bordercolor="DarkGray", borderwidth=1))
if not W == None:
fig.update_layout(width = W)
fig.update_traces(texttemplate= 10*' ' + '%%{text}', textposition='inside')
fig.update_traces(marker_line_color= 'Black', marker_line_width=1., opacity=1)
fig.update_layout(title={'text': '<b>' + Title + '<b>', 'x':0.5,
'y':0.90, 'xanchor': 'center', 'yanchor': 'top'})
fig.show()
return data_info
def dtypes_group(Inp, Dict = False):
Temp = Inp.dtypes.to_frame(name='Data Type').sort_values(by=['Data Type'])
Out = pd.DataFrame(index =Temp['Data Type'].unique(), columns = ['Features','Count'])
for c in Temp['Data Type'].unique():
Out.loc[Out.index == c, 'Features'] = [Temp.loc[Temp['Data Type'] == c].index.tolist()]
Out.loc[Out.index == c, 'Count'] = len(Temp.loc[Temp['Data Type'] == c].index.tolist())
Out.index.name = 'Data Type'
Out = Out.reset_index(drop = False)
Out['Data Type'] = Out['Data Type'].astype(str)
if Dict:
Out = dict(zip(Out['Data Type'], Out['Features']))
return Out
_ = Data_Plot(Data, Title = 'Statlog Dataset', W = 800)
dType = dtypes_group(Data, Dict = True)
| Age | Sex | Job | Housing | Saving accounts | Checking account | Credit amount | Duration | Purpose | Risk | |
|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 67 | male | 2 | own | NaN | little | 1169 | 6 | radio/TV | good |
| 1 | 22 | female | 2 | own | little | moderate | 5951 | 48 | radio/TV | bad |
| 2 | 49 | male | 1 | own | little | NaN | 2096 | 12 | education | good |
| 3 | 45 | male | 2 | free | little | little | 7882 | 42 | furniture/equipment | good |
| 4 | 53 | male | 2 | free | little | little | 4870 | 24 | car | bad |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 995 | 31 | female | 1 | own | little | NaN | 1736 | 12 | furniture/equipment | good |
| 996 | 40 | male | 3 | own | little | little | 3857 | 30 | car | good |
| 997 | 38 | male | 2 | own | little | NaN | 804 | 12 | radio/TV | good |
| 998 | 23 | male | 2 | free | little | little | 1845 | 45 | radio/TV | bad |
| 999 | 27 | male | 2 | own | moderate | moderate | 4576 | 45 | car | good |
1000 rows × 10 columns
Dealing with NaN values.
## Categorical Data
# Filling Nan values with None
Data[dType['object']] = Data[dType['object']].fillna('None')
Data[dType['object']] = Data[dType['object']].applymap(lambda x: x.title())
for Feat in dType['object']:
Data[Feat] = Data[Feat].str.replace('Tv','TV')
Data.columns = [x.title() for x in Data.columns]
_ = Data_Plot(Data, Title = 'Statlog Dataset', W = 800)
Data.to_csv (Path.split(".")[0]+'_Mod.csv', index = None, header=True)
# A copy of the dataset
df = Data.copy()
# Feat Dictionary
Feat_Dict = {}
def Distinct_Observations(Inp, Target = Target, Featured_Columns = None, YL = None):
if Featured_Columns == None:
Featured_Columns = list(set(Data.columns) - {Target})
Temp = Inp[Featured_Columns].nunique()
fig = go.Figure([go.Bar(x=Temp.index, y=Temp.values)])
fig.update_traces(marker_line_color= 'Navy', marker_line_width=1, opacity=1, showlegend = False)
fig.update_layout(legend_orientation='v', plot_bgcolor= 'white', height= 450, width= 980,
title={'text': '<b>' + 'Distinct Observations in Each Column' + '<b>', 'x':0.5,
'y': 0.92, 'xanchor': 'center', 'yanchor': 'top'},
yaxis_title='Frequency')
fig.update_xaxes(showline=True, linewidth=1, linecolor='Lightgray', mirror=True,
zeroline=False, zerolinewidth=1, zerolinecolor='Black',
showgrid=False, gridwidth=1, gridcolor='Lightgray')
fig.update_yaxes(showline=True, linewidth=1, linecolor='Lightgray', mirror=True,
zeroline=True, zerolinewidth=1, zerolinecolor='Black',
showgrid=True, gridwidth=1, gridcolor='Lightgray')
if not YL == None:
fig.update_yaxes(range =[0, YL])
fig.show()
return Temp
Table = Distinct_Observations(df, Featured_Columns = None, YL = 1000)
def FeatBins(Inp, Bins, replace = True, String = True):
Bins = [int(x) for x in Bins]
Out = pd.cut(Inp, bins = pd.IntervalIndex.from_tuples([(x, y) for x, y in zip(Bins[:-1],Bins[1:])]))
Temp = np.sort(Out.astype('str').unique())
Dict = dict(zip(Temp, np.arange(len(Temp))))
if replace:
Out = Out.astype('str').replace(Dict)
if String:
Out = Out.astype('str')
try:
Out = Out.str.replace(pat = '(-1', repl = '[0')
except:
pass
return Out
def BinPlot(Feat, Bins, xLim, Inp = Data, W = 600, TableColors = ['Navy','White']):
fig = make_subplots(rows=2, cols=1, vertical_spacing = 0.1, row_heights=[.1, .9],
specs=[[{"type": "xy"}], [{"type": "table"}]])
# Top
fig.add_trace(go.Scatter(x= Bins, y=0*Bins, mode='lines+markers', name='Intervals',
line = dict(color='SkyBlue', width= 10),
marker=dict(size=15*np.ones(len(Bins)), opacity=1,
color=np.random.choice(list(mcolors.CSS4_COLORS.keys()), size = len(Bins)))), row=1, col=1)
fig.update_xaxes(showline=True, linewidth=1, linecolor='Lightgray', mirror=True,
showgrid=True, gridwidth=1, gridcolor='Lightgray',
tickvals=Bins, range=xLim, row=1, col=1)
fig.update_yaxes(showline=True, linewidth=1, linecolor='Lightgray', mirror=True,
showticklabels=False, range= [-.2, .2], row=1, col=1)
fig.update_traces(marker_line_color= 'Black', marker_line_width=1.5, opacity=1, row=1, col=1)
fig.update_layout(plot_bgcolor= 'white', barmode='stack', width = W)
# Bottom
Bins = [int(x) for x in Bins]
Table = pd.cut(Inp[Feat], bins = pd.IntervalIndex.from_tuples([(x, y) for x, y in zip(Bins[:-1],Bins[1:])]))
Table = Table.value_counts().to_frame('Count').reset_index().rename(columns = {'index':Feat})
Table = Table.sort_values(by = [Feat])
Table[Feat] = Table[Feat].astype(str)
Table['Percentage'] = np.round(100*Table['Count']/Table['Count'].sum(), 2)
T = Table.copy()
T['Percentage'] = T['Percentage'].map(lambda x: '%.2f' % x)
Temp = []
for i in T.columns:
Temp.append(T.loc[:,i].values)
fig.add_trace(go.Table(header=dict(values = list(Table.columns), line_color='darkslategray',
fill_color= TableColors[0], align=['center','center'],
font= dict(color='white', size=12), height=25),
columnwidth = [0.4, 0.2, 0.2],
cells=dict(values=Temp, line_color='darkslategray',
fill=dict(color= [TableColors[1], TableColors[1]]),
align=['center', 'center'], font_size=12, height=20)), row=2, col=1)
fig.update_layout(title={'text': '<b>' + Feat + '<b>', 'x':0.5,
'y':0.90, 'xanchor': 'center', 'yanchor': 'top'})
fig.show()

Feat = 'Age'
Bins = np.array([15, 30, 45, 65, 85], dtype = int)
Feat_Dict.update({Feat:Bins})
df[Feat] = FeatBins(Inp = Data[Feat], Bins = Bins, replace = True)
BinPlot(Feat, Bins, xLim = [0, 100])
del Feat, Bins
Feat = 'Checking Account'
Feat_Dict.update({Feat: {'None':0, 'Little': 1, 'Moderate':2, 'Rich':3}})
df[Feat] = df[Feat].replace(Feat_Dict[Feat]).astype(int)
del Feat
Feat = 'Credit Amount'
Bins = np.array([0, 1e3, 5e3, 1e4, 2e4], dtype = int)
Feat_Dict.update({Feat:Bins})
# df[Feat] = FeatBins(Inp = Data[Feat], Bins = Bins, replace = True)
# BinPlot(Feat, Bins, xLim = [-1e3, 21e3])
# del Feat, Bins
Feat = 'Duration'
Bins = np.array([0, 6, 12, 18, 24, 80], dtype = int)
Feat_Dict.update({Feat:Bins})
df[Feat] = FeatBins(Inp = Data[Feat], Bins = Bins, replace = True)
BinPlot(Feat, Bins, xLim = [-5, 85])
del Feat, Bins
Feat = 'Housing'
Feat_Dict.update({Feat: {'Free':0, 'Rent': 1, 'Own':2}})
df[Feat] = df[Feat].replace(Feat_Dict[Feat]).astype(int)
del Feat
Feat_Dict.update({'Job':{0:'Unskilled and Non-Resident', 1:'Unskilled and Resident', 2:'Skilled', 3:'Highly Skilled'}})
Temp = df.drop(columns = ['Purpose'])
df = pd.concat([Temp, pd.get_dummies(df['Purpose'])], axis = 1)
del Temp
Feat = 'Risk'
Feat_Dict.update({Feat: {'Bad':0, 'Good':1}})
df[Feat] = df[Feat].replace(Feat_Dict[Feat]).astype(int)
del Feat
Feat = 'Saving Accounts'
Feat_Dict.update({Feat: {'None':0, 'Little': 1, 'Moderate':2, 'Quite Rich':3, 'Rich':4}})
df[Feat] = df[Feat].replace(Feat_Dict[Feat]).astype(int)
del Feat
Feat = 'Sex'
Feat_Dict.update({Feat: {'Female':0, 'Male':1}})
df[Feat] = df[Feat].replace(Feat_Dict[Feat]).astype(int)
del Feat
with open(Path.split(".")[0] + '_Feat_Dict.pkl', 'wb') as fp:
pickle.dump(Feat_Dict, fp, protocol=pickle.HIGHEST_PROTOCOL)
df.to_csv (Path.split(".")[0]+'_Num.csv', index = None, header=True)
Moreover, high variance for some features can hurt our modeling process. For this reason, we would like to standardize features by removing the mean and scaling to unit variance.
def Feature_Normalize(Inp, Target = Target, FS = (12, 8), annot_kws = 12):
X = Inp.drop(columns = [Target])
# scaling data
scaler = preprocessing.StandardScaler()
X_std = scaler.fit_transform(X)
X_std = pd.DataFrame(data = X_std, columns = X.columns)
fig, ax = plt.subplots(2, 1, figsize=FS)
ax = ax.ravel()
CP = [sns.color_palette("OrRd", 20), sns.color_palette("Greens", X.shape[1])]
Names = ['Variance of the Features', 'Variance of the Features (Standardized)']
Sets = [X, X_std]
kws = dict(label='Feature\nVariance', aspect=10, shrink= .3)
for i in range(len(ax)):
Temp = Sets[i].var().sort_values(ascending = False).to_frame(name= 'Variance').round(2).T
_ = sns.heatmap(Temp, ax=ax[i], annot=True, square=True, cmap = CP[i],
linewidths = 0.8, vmin=0, vmax=Temp.max(axis =1)[0], annot_kws={"size": annot_kws},
cbar_kws=kws)
_ = ax[i].set_yticklabels('')
_ = ax[i].set_title(Names[i], weight='bold', fontsize = 14)
del Temp
Out = Inp.copy()
Out[X.columns] = X_std.copy()
return Out
df = Feature_Normalize(df, FS = (14, 8), annot_kws = 12)
df.to_csv (Path.split(".")[0]+'_STD.csv', index = None, header=True)
def Correlation_Plot (Inp, Fig_Size = 12, annot_kws = 11):
Correlation_Matrix = Inp.corr().round(2)
mask = np.zeros_like(Correlation_Matrix)
mask[np.triu_indices_from(mask)] = True
for i in range(len(mask)):
mask[i,i]=0
fig, ax = plt.subplots(figsize=(Fig_Size, Fig_Size))
sns.heatmap(Correlation_Matrix, ax=ax, mask=mask, annot=True, square=True,
cmap =sns.color_palette("Greens", n_colors=10), linewidths = 0.2, vmin=0, vmax=1,
cbar_kws={'label': 'Correlation', "aspect":30, "shrink": .4}, annot_kws={"size": annot_kws})
return Correlation_Matrix
def Feature_Correlation(Inp, Target = Target, annot_kws = 11, FS = (17, 16), rep_long_labels = False):
Fig, ax = plt.subplots(figsize = FS)
Temp = Inp.corr().round(2)
Temp = Temp.loc[(Temp.index == Target)].drop(columns = Target).T.sort_values(by = Target).T
_ = sns.heatmap(Temp, ax=ax, annot=True, square=True, cmap =sns.color_palette("YlGn", n_colors=Temp.shape[1]),
linewidths = 0.8, vmin=0, vmax=1,
annot_kws={"size": annot_kws},
cbar_kws={'label': 'Correlation with %s' % Target,
"aspect":40, "shrink": .4, "orientation": "horizontal"})
if rep_long_labels:
labels = [x.replace(' ','\n').replace('\nof\n',' of\n')
for x in [item.get_text() for item in ax.get_xticklabels()]]
_ = ax.set_xticklabels(labels)
_ = ax.set_yticklabels('')
_ = Correlation_Plot (df, 12, annot_kws = 11)
# Feature_Correlation(df, annot_kws = 11, FS = (12, 16), rep_long_labels = True)